struct pt_regs *regs)
{ return 0; }
void flush_tlb_current_task(void)
-{ xen_tlb_flush_mask(current->mm->cpu_vm_mask); }
+{ xen_tlb_flush_mask(¤t->mm->cpu_vm_mask); }
void flush_tlb_mm(struct mm_struct * mm)
-{ xen_tlb_flush_mask(mm->cpu_vm_mask); }
+{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
-{ xen_invlpg_mask(vma->vm_mm->cpu_vm_mask, va); }
+{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
void flush_tlb_all(void)
{ xen_tlb_flush_all(); }
BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
-void xen_tlb_flush_mask(cpumask_t mask)
+void xen_tlb_flush_mask(cpumask_t *mask)
{
struct mmuext_op op;
+ if ( cpus_empty(*mask) )
+ return;
op.cmd = MMUEXT_TLB_FLUSH_MULTI;
- op.cpuset = mask.bits;
+ op.cpuset = mask->bits;
BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
-void xen_invlpg_mask(cpumask_t mask, unsigned long ptr)
+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
{
struct mmuext_op op;
+ if ( cpus_empty(*mask) )
+ return;
op.cmd = MMUEXT_INVLPG_MULTI;
- op.cpuset = mask.bits;
+ op.cpuset = mask->bits;
op.linear_addr = ptr & PAGE_MASK;
BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
-void xen_tlb_flush_mask(cpumask_t mask)
+void xen_tlb_flush_mask(cpumask_t *mask)
{
struct mmuext_op op;
+ if ( cpus_empty(*mask) )
+ return;
op.cmd = MMUEXT_TLB_FLUSH_MULTI;
- op.cpuset = mask.bits[0];
+ op.cpuset = mask->bits;
BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
-void xen_invlpg_mask(cpumask_t mask, unsigned long ptr)
+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
{
struct mmuext_op op;
+ if ( cpus_empty(*mask) )
+ return;
op.cmd = MMUEXT_INVLPG_MULTI;
- op.cpuset = mask.bits[0];
+ op.cpuset = mask->bits;
op.linear_addr = ptr & PAGE_MASK;
BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
-#if 0 /* XEN */
+#if 0 /* XEN: no lazy tlb */
unsigned cpu = smp_processor_id();
if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
cpu_clear(cpu, prev->cpu_vm_mask);
-#if 0 /* XEN */
+#if 0 /* XEN: no lazy tlb */
per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
per_cpu(cpu_tlbstate, cpu).active_mm = next;
#endif
BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
}
-#if 0 /* XEN */
+#if 0 /* XEN: no lazy tlb */
else {
per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
#endif
}
-#define deactivate_mm(tsk, mm) \
- asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
+/*
+ * XEN: We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
+ * *much* faster this way, as no tlb flushes means much bigger wrpt batches.
+ */
+#define deactivate_mm(tsk, mm) do { \
+ asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
+ if ((mm) && cpu_isset(smp_processor_id(), (mm)->cpu_vm_mask)) { \
+ cpu_clear(smp_processor_id(), (mm)->cpu_vm_mask); \
+ load_cr3(swapper_pg_dir); \
+ } \
+} while (0)
#define activate_mm(prev, next) do { \
switch_mm((prev),(next),NULL); \
static inline void flush_tlb_mm(struct mm_struct *mm)
{
- if (mm == current->active_mm)
+ /* XEN: cpu_vm_mask is more accurate than active_mm. */
+ if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
__flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
- if (vma->vm_mm == current->active_mm)
+ /* XEN: cpu_vm_mask is more accurate than active_mm. */
+ if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
__flush_tlb_one(addr);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- if (vma->vm_mm == current->active_mm)
+ /* XEN: cpu_vm_mask is more accurate than active_mm. */
+ if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
__flush_tlb();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
- if (mm == current->active_mm)
+ /* XEN: cpu_vm_mask is more accurate than active_mm. */
+ if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
__flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
- if (vma->vm_mm == current->active_mm)
+ /* XEN: cpu_vm_mask is more accurate than active_mm. */
+ if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
__flush_tlb_one(addr);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- if (vma->vm_mm == current->active_mm)
+ /* XEN: cpu_vm_mask is more accurate than active_mm. */
+ if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
__flush_tlb();
}
#include <linux/cpumask.h>
void xen_tlb_flush_all(void);
void xen_invlpg_all(unsigned long ptr);
-void xen_tlb_flush_mask(cpumask_t mask);
-void xen_invlpg_mask(cpumask_t mask, unsigned long ptr);
+void xen_tlb_flush_mask(cpumask_t *mask);
+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr);
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)